1. Import Data and Library

library(Seurat)
## Attaching SeuratObject
library(SeuratData)
## Registered S3 method overwritten by 'cli':
##   method     from         
##   print.boxx spatstat.geom
## ── Installed datasets ───────────────────────────────────── SeuratData v0.2.1 ──
## ✓ bmcite       0.3.0                    ✓ pbmcMultiome 0.1.0
## ────────────────────────────────────── Key ─────────────────────────────────────
## ✓ Dataset loaded successfully
## > Dataset built with a newer version of Seurat than installed
## ❓ Unknown version of Seurat installed
library(cowplot)
library(dplyr)
## 
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
## 
##     filter, lag
## The following objects are masked from 'package:base':
## 
##     intersect, setdiff, setequal, union
# Just an example way to preprocess data, to demonstrate the PCA part
# # You could use SCTransform into RunPCA also
# bm <- LoadData(ds = "bmcite")
# bm <- Seurat::NormalizeData(bm) %>% Seurat::FindVariableFeatures() %>% Seurat::ScaleData() %>% Seurat::RunPCA()
# store_date <- date()
# save(bm, pca_mat, cell_type, sub_dat, sub_celltype, cormat_list, store_date, file = "PCA_seurat2.Rdata")
load("PCA_seurat2.Rdata")
celltype_factor <- as.numeric(as.factor(bm@meta.data$celltype.l2))
# scale_mat <- t(bm[["RNA"]]@scale.data[bm[["RNA"]]@var.features,])
# dim(scale_mat); round(scale_mat[1:5,1:5],2)
# idx1 <- which(colnames(scale_mat) == "IGHA1")
# idx2 <- which(colnames(scale_mat) == "GYPE")
# plot(scale_mat[,idx1], scale_mat[,idx2], asp = T, col = celltype_factor,
#      xlab = colnames(scale_mat)[idx1],
#      ylab = colnames(scale_mat)[idx2])

# This is what plotting the scale.data matrix looks like
# It still looks like it's very aligned with the axes 
pca_obj <- bm[["pca"]]
dim(pca_obj@cell.embeddings)
## [1] 30672    50
dim(pca_obj@feature.loadings)
## [1] 2000   50
pca_mat <- pca_obj@cell.embeddings %*% t(pca_obj@feature.loadings)
dim(pca_mat); round(pca_mat[1:5,1:5],2)
## [1] 30672  2000
##                       IGKC  HBA2   HBB  HBA1 IGHA1
## a_AAACCTGAGCTTATCG-1 -0.82 -0.68 -0.75 -0.81 -0.13
## a_AAACCTGAGGTGGGTT-1  0.10  0.06 -0.08 -0.01 -1.24
## a_AAACCTGAGTACATGA-1 -0.36  0.06  0.18  0.08 -0.17
## a_AAACCTGCAAACCTAC-1 -0.21  0.04  0.05  0.04 -0.34
## a_AAACCTGCAAGGTGTG-1 -0.31  0.09  0.10 -0.03 -0.59
idx1 <- which(colnames(pca_mat) == "IGHA1")
idx2 <- which(colnames(pca_mat) == "GYPE")
plot(pca_mat[,idx1], pca_mat[,idx2], asp = T, col = celltype_factor,
     xlab = colnames(pca_mat)[idx1],
     ylab = colnames(pca_mat)[idx2])

# By plotting the low-rank matrix, some of the structure is more visible
# You can verify that pca_mat is indeed low-rank by running
# Matrix::rankMatrix(pca_mat) 
# (it should return 50), but this line takes a few minutes to run
cell_labels <- unique(bm@meta.data$celltype.l2)
cell_type <- bm@meta.data$celltype.l2
# rand_ind <- c()
# 
# for (cell in cell_labels){
#   set.seed(10)
#   subcell_ind <- which(cell_type == cell)
#   subcell_len <- length(subcell_ind)
#   subcell_mat <- pca_mat[subcell_ind, ]
# 
#   row_ind <- apply(subcell_mat, 1, function(x){length(which(x != 0))})
#   idx <- order(row_ind, decreasing = T)
#   sub_rand <- sample(length(subcell_ind),
#                      length(subcell_ind)/40)
#   rand_ind <- c(rand_ind, idx[1:(subcell_len/30)])
# }

# rand_ind <- c()
# for (cell in cell_labels){
#   set.seed(123)
#   subcell_ind <- which(cell_type == cell)
#   sub_rand <- sample(length(subcell_ind),
#                      length(subcell_ind)/40)
#   rand_ind <- c(rand_ind, subcell_ind[sub_rand])
# }

# sub_dat <- pca_mat[rand_ind, ]
# 
# col_ind <- apply(sub_dat, 2, function(x){length(which(x != 0))})
# idx <- order(col_ind, decreasing = T)[1:500]
# 
# sub_dat <- sub_dat[, idx]
# 
# dat_hclust <- hclust(dist(t(sub_dat)))
# dat_index <- dat_hclust$order
# 
# sub_dat <- sub_dat[, dat_index]
# sub_celltype <- cell_type[rand_ind]
sub_cluster_labels <- as.numeric(as.factor(sub_celltype))

dim(sub_dat)
## [1] 1010  500

2-1. Dependency Measures

library(reshape2) # melt function
library(ggplot2) # ggplot function
library(pcaPP) # Fast Kendall function
library(energy) # Distance Correlation
library(Hmisc) # Hoeffding's D measure
## Loading required package: lattice
## Loading required package: survival
## Loading required package: Formula
## 
## Attaching package: 'Hmisc'
## The following objects are masked from 'package:dplyr':
## 
##     src, summarize
## The following object is masked from 'package:SeuratObject':
## 
##     Key
## The following object is masked from 'package:Seurat':
## 
##     Key
## The following objects are masked from 'package:base':
## 
##     format.pval, units
library(zebu) # Normalized Mutual Information
# library(minerva) # Maximum Information Coefficient
library(XICOR) # Chatterjee's Coefficient
# library(dHSIC) # Hilbert Schmidt Independence Criterion
library(VineCopula) # Blomqvist's Beta

make_cormat <- function(dat_mat){
matrix_dat <- matrix(nrow = ncol(dat_mat), ncol = ncol(dat_mat))
  cor_mat_list <- list()
  
  basic_cor <- c("pearson", "spearman")
  # find each of the correlation matrices with Pearson, Spearman, Kendall Correlation Coefficients
  for (i in 1:2){
    print(i)
    cor_mat <- stats::cor(dat_mat, method = basic_cor[i])
    cor_mat[upper.tri(cor_mat, diag = T)] <- NA
    cor_mat_list[[i]] <- cor_mat
  }
  
  # functions that take matrix or data.frame as input
  no_loop_function <- c(pcaPP::cor.fk, Hmisc::hoeffd, 
                        VineCopula::BetaMatrix)
  for (i in 3:5){
    print(i)
    fun <- no_loop_function[[i-2]]
    cor_mat <- fun(dat_mat)
    if (i == 4){ # Hoeffding's D
      cor_mat <- cor_mat$D
    }
    cor_mat[upper.tri(cor_mat, diag = T)] <- NA
    cor_mat_list[[i]] <- cor_mat
  }
  
  # functions that take two variables as input to calculate correlations.
  need_loop <- c(zebu::lassie, energy::dcor2d, XICOR::calculateXI)

  for (i in 6:8){
    print(i)
    fun <- need_loop[[i-5]]
    
    cor_mat <- matrix(nrow = ncol(dat_mat),
                      ncol = ncol(dat_mat))
    
    for (j in 2:ncol(dat_mat)){
      for (k in 1:(j-1)){
        if (i == 6){
          cor_mat[j, k] <- fun(cbind(dat_mat[, j], dat_mat[, k]), continuous=c(1,2), breaks = 6, measure = "npmi")$global

        } else {
          cor_mat[j, k] <- fun(as.numeric(dat_mat[, j]),
                               as.numeric(dat_mat[, k]))
        }
      }
    }
    
    cor_mat[upper.tri(cor_mat, diag = T)] <- NA
    cor_mat_list[[i]] <- cor_mat
  }
  return(cor_mat_list)
}

draw_heatmap <- function(cor_mat){
    len <- 5
    melted_cormat <- melt(cor_mat)
    melted_cormat <- melted_cormat[!is.na(melted_cormat$value),]
    break_vec <- round(as.numeric(quantile(melted_cormat$value,
                                           probs = seq(0, 1, length.out = len),
                                           na.rm = T)),
                       4)
    break_vec[1] <- break_vec[1]-1
    break_vec[len] <- break_vec[len]+1
    melted_cormat$value <- cut(melted_cormat$value, breaks = break_vec)
    heatmap_color <- unique(melted_cormat$value)
  
    heatmap <- ggplot(data = melted_cormat, aes(x = Var2, y = Var1, fill = value))+
      geom_tile(colour = "Black") +
      ggplot2::scale_fill_manual(breaks = sort(heatmap_color), 
                                 values = rev(scales::viridis_pal(begin = 0, end = 1)
                                              (length(heatmap_color)))) +
      theme_bw() + # make the background white
      theme(panel.border = element_blank(), panel.grid.major = element_blank(),
            panel.grid.minor = element_blank(), axis.ticks = element_blank(),
            # erase tick marks and labels
            axis.text.x = element_blank(), axis.text.y = element_blank())
    
    return (heatmap)
}

make_cor_heatmap <- function(dat_mat, cor_mat_list){
  fun_lable <- c("Pearson's Correlation", "Spearman's Correlation", "Kendall's Correlation",
                 "Hoeffding's D", "Blomqvist's Beta", "NMI",
                  "Distance Correlation", "XI Correlation")
  
  cor_heatmap_list <- list()
  cor_abs_heatmap_list <- list()
  
  # make correlation matrices
  #cor_mat_list <- make_cormat(dat_mat)
  
  for (i in 1:8){
    print(i)
    cor_mat <- abs(cor_mat_list[[i]])
    
    # get heatmaps
    cor_heatmap <- draw_heatmap(cor_mat)
    
    # ggplot labels
    ggplot_labs <- labs(title = paste("Heatmap of", fun_lable[i]),
                      x = "",
                      y = "",
                      fill = "Coefficient") # change the title and legend label
      
    cor_heatmap_list[[i]] <- cor_heatmap + ggplot_labs
    
    if (i %in% c(1,2,3,4,6)){
      cor_abs_mat <- abs(cor_mat_list[[i]])
      cor_abs_heatmap <- draw_heatmap(cor_abs_mat)
      ggplot_abs_labs <- labs(title = paste("Abs Heatmap of", fun_lable[i]),
                              x = "", # change the title and legend label
                              y = "", 
                              fill = "Coefficient") 
      cor_abs_heatmap_list[[i]] <- cor_abs_heatmap + ggplot_abs_labs
    } else {
      ggplot_abs_labs <- labs(title = paste("Abs Heatmap of", fun_lable[i]),
                              subtitle = "Equivalent to Non-Abs Heatmap",
                              x = "", # change the title and legend label
                              y = "", 
                              fill = "Coefficient") 
      cor_abs_heatmap_list[[i]] <- cor_heatmap + ggplot_abs_labs
    }
  }
  
  ans <- list(cor_heatmap_list, cor_abs_heatmap_list)
  
  return (ans)
}
load("PCA_seurat_corr2.RData")
# save(cormat_list, heatmap_list, sub_dat, sub_celltype, store_date, file = "PCA_seurat_corr2.RData")

# cormat_list <- make_cormat(sub_dat)
# heatmap_list <- make_cor_heatmap(sub_dat, cormat_list)

cor_pearson_mat <- cormat_list[[1]]; cor_spearman_mat <- cormat_list[[2]];
cor_kendall_mat <- cormat_list[[3]]; cor_hoeffd_mat <- cormat_list[[4]];
cor_blomqvist_mat <- cormat_list[[5]]; cor_MI_mat <- cormat_list[[6]];
cor_dist_mat <- cormat_list[[7]]; cor_XI_mat <- cormat_list[[8]];

1. Pearson’s correlation coefficient

  • Pearson’s correlation is to measure linear dependency of data, X and Y
  • \(-1 \leq \rho_{Pearson}(X, Y) \leq 1\)
  • \(\rho_{Pearson}(X, Y) = \frac{\sum(x_i-\bar{x})(y_i -\bar{y})}{\sum(x_i-\bar{x})^2(y_i -\bar{y})^2}\)
cor_pearson_mat[1:5,1:5]
##            HBB      HBA1       CA2      MYL4 HMBS
## HBB         NA        NA        NA        NA   NA
## HBA1 0.9802899        NA        NA        NA   NA
## CA2  0.8855958 0.9015420        NA        NA   NA
## MYL4 0.8827101 0.8962023 0.9884437        NA   NA
## HMBS 0.8838557 0.8991237 0.9833718 0.9713701   NA
quantile(cor_pearson_mat, na.rm = T)
##          0%         25%         50%         75%        100% 
## -0.62520607 -0.03468622  0.12321020  0.56398351  0.99752531
quantile(abs(cor_pearson_mat), na.rm = T)
##           0%          25%          50%          75%         100% 
## 8.153488e-07 6.515312e-02 1.713022e-01 5.640908e-01 9.975253e-01
# plot the smallest correlations
cor_pearson_vec <- sort(abs(cor_pearson_mat), decreasing = T)
plot(cor_pearson_vec)

#plot the high correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_pearson_mat) == cor_pearson_vec[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_pearson_mat[idx1, idx2], 3)))
}

#plot the lowest correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_pearson_mat) == rev(cor_pearson_vec)[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_pearson_mat[idx1, idx2], 3)))
}

Heatmap

heatmap_list[[1]][[1]]

2. Spearman’s correlation coefficient

  • It captures the monotonic relationship between data, X and Y
  • \(-1 \leq \rho_{Spearman}(X,Y) \leq 1\)
  • \(\rho_{Spearman} = 1 - \frac{6\sum{d_i^2}}{n(n^2-1)}\) where \(d_i\) is the difference between the ranks of \(x_i\) and \(y_i\)
cor_spearman_mat[1:5,1:5]
##             HBB       HBA1       CA2     MYL4 HMBS
## HBB          NA         NA        NA       NA   NA
## HBA1 0.90608212         NA        NA       NA   NA
## CA2  0.22855902 0.18667074        NA       NA   NA
## MYL4 0.02741376 0.09957723 0.6945166       NA   NA
## HMBS 0.22632275 0.30332035 0.5970641 0.446277   NA
quantile(cor_spearman_mat, na.rm = T)
##         0%        25%        50%        75%       100% 
## -0.7084475 -0.0495692  0.0613620  0.1862800  0.9884195
quantile(abs(cor_spearman_mat), na.rm = T)
##           0%          25%          50%          75%         100% 
## 1.502738e-06 5.586818e-02 1.195224e-01 2.129959e-01 9.884195e-01
# plot the smallest correlations
cor_spearman_vec <- sort(abs(cor_spearman_mat), decreasing = T)
plot(cor_spearman_vec)

#plot the high correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_spearman_mat) == cor_spearman_vec[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_spearman_mat[idx1, idx2], 3)))
}

#plot the lowest correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_spearman_mat) == rev(cor_spearman_vec)[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_spearman_mat[idx1, idx2], 3)))
}

Heatmap

heatmap_list[[1]][[2]]

3. Kendall’s Tau

  • It is an alternative method to Spearman’s correlations, identifying monotonic relationships.
  • \(-1 \leq \rho_{Kendall}(X,Y) \leq 1\)
  • \(\rho_{Kendall}(X,Y) = \frac{\#\;concordant\;pairs - \#\;discordant \;pairs}{0.5n(n-1)}\)
cor_kendall_mat[1:5,1:5]
##             HBB       HBA1       CA2     MYL4 HMBS
## HBB          NA         NA        NA       NA   NA
## HBA1 0.74883220         NA        NA       NA   NA
## CA2  0.16040207 0.13177256        NA       NA   NA
## MYL4 0.02070151 0.06846753 0.5171020       NA   NA
## HMBS 0.16115881 0.21440825 0.4366613 0.323007   NA
quantile(cor_kendall_mat, na.rm = T)
##          0%         25%         50%         75%        100% 
## -0.49295752 -0.03455568  0.04087806  0.12781749  0.91723356
quantile(abs(cor_kendall_mat), na.rm = T)
##         0%        25%        50%        75%       100% 
## 0.00000000 0.03806701 0.08152633 0.14660931 0.91723356
# plot the smallest correlations
cor_kendall_vec <- sort(abs(cor_kendall_mat), decreasing = T)
plot(cor_kendall_vec)

#plot the high correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_kendall_mat) == cor_kendall_vec[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_kendall_mat[idx1, idx2], 3)))
}

#plot the lowest correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_kendall_mat) == rev(cor_kendall_vec)[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_kendall_mat[idx1, idx2], 3)))
}

Heatmap

heatmap_list[[1]][[3]]

4. Hoeffding’s D

  • It tests the independence of data by calculating the distance between the product of the marginal distributions under the null hypothesis and the empirical bi-variate distribution.
  • \(-1 \leq D(X,Y) \leq 1\)
  • \(D(X,Y) = \frac{(n-2)(n-3)D_1+D_2-2(n-2)D_3}{n(n-1)(n-2)(n-3)(n-4)}\)
    • \(D_1 = \sum_{i=1}^{n} Q_i(Q_i-1)\)
    • \(D_2 = \sum_{i=1}^{n} (R_i-1)(R_i-2)(S_j-1)(S_j-2)\)
    • \(D_3 = \sum_{i=1}^{n} (R_i-2)(S_i-2)Q_i\)
cor_hoeffd_mat[1:5,1:5]
##              HBB       HBA1       CA2       MYL4 HMBS
## HBB           NA         NA        NA         NA   NA
## HBA1 0.434728412         NA        NA         NA   NA
## CA2  0.020137266 0.01293066        NA         NA   NA
## MYL4 0.005648109 0.01021485 0.1893165         NA   NA
## HMBS 0.021018452 0.04197650 0.1294001 0.08287349   NA
quantile(cor_hoeffd_mat, na.rm = T)
##           0%          25%          50%          75%         100% 
## 0.0004798045 0.0063848304 0.0108658497 0.0205007125 0.7607994473
# plot the smallest correlations
cor_hoeffd_vec <- sort(abs(cor_hoeffd_mat), decreasing = T)
plot(cor_hoeffd_vec)

#plot the high correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_hoeffd_mat) == cor_hoeffd_vec[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_hoeffd_mat[idx1, idx2], 3)))
}

#plot the lowest correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_hoeffd_mat) == rev(cor_hoeffd_vec)[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_hoeffd_mat[idx1, idx2], 3)))
}

Heatmap

heatmap_list[[1]][[4]]

5. Blomqvist’s Beta

  • It measures dependency between variables by constructing a two-way contingency table with the medians of each margin as cutting points.
  • \(0 \leq \beta \leq 1\)
  • \(\beta_n = \frac{n_1-n_2}{n_1+n_2} = \frac{2n_1}{n_1+n_2} - 1\)
  • \(\beta = P\{(X-\tilde{x})(Y-\tilde{y})>0\} - P\{(X-\tilde{x})(Y-\tilde{y}) < 0\}\)
cor_blomqvist_mat[1:5,1:5]
##           [,1]      [,2]      [,3]      [,4] [,5]
## [1,]        NA        NA        NA        NA   NA
## [2,] 0.9425743        NA        NA        NA   NA
## [3,] 0.9346535 0.9643564        NA        NA   NA
## [4,] 0.9346535 0.9643564 1.0000000        NA   NA
## [5,] 0.8811881 0.9108911 0.9386139 0.9386139   NA
quantile(cor_blomqvist_mat, na.rm = T)
##         0%        25%        50%        75%       100% 
## 0.03366337 0.79801980 0.87326733 0.92673267 1.00000000
quantile(abs(cor_blomqvist_mat), na.rm = T)
##         0%        25%        50%        75%       100% 
## 0.03366337 0.79801980 0.87326733 0.92673267 1.00000000
# plot the smallest correlations
cor_blomqvist_vec <- sort(abs(cor_blomqvist_mat), decreasing = T)
plot(cor_blomqvist_vec)

#plot the high correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_blomqvist_mat) == cor_blomqvist_vec[i], arr.ind = T)
 idx1 <- idx[i,1]; idx2 <- idx[i,2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_blomqvist_mat[idx1, idx2], 3)))
}

#plot the lowest correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_blomqvist_mat) == rev(cor_blomqvist_vec)[i], arr.ind = T)
 idx1 <- idx[1,1]; idx2 <- idx[1,2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_blomqvist_mat[idx1, idx2], 3)))
}

Heatmap

heatmap_list[[1]][[5]]

6. Normalized Mutual Information

  • It measures how much one random variable gives information about the other. For example, High mutual information indicates a large reduction in uncertainty.
  • \(0 \leq MI(X,Y) \leq 1\), as it is normalized.
  • $MI(X,Y) = f_{X,Y} (x,y) log_2 ; dxdy $
  • \(MI(X,Y) = \sum \sum p_{X,Y} (x,y) log \frac{p_{X,Y} (x,y)}{P_X(x)P_Y(y)}\)
cor_MI_mat[1:5,1:5]
##           [,1]      [,2]     [,3]      [,4] [,5]
## [1,]        NA        NA       NA        NA   NA
## [2,] 0.8105496        NA       NA        NA   NA
## [3,] 0.9993603 0.9990028       NA        NA   NA
## [4,] 0.9480882 0.9466026 0.997124        NA   NA
## [5,] 0.5896230 0.5881938 1.000000 0.9465458   NA
quantile(cor_MI_mat, na.rm = T)
##          0%         25%         50%         75%        100% 
## 0.008393862 0.151541806 0.267994563 0.523874305 1.000000000
# plot the smallest correlations
cor_MI_vec <- sort(abs(cor_MI_mat), decreasing = T)
plot(cor_MI_vec)

#plot the high correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_MI_mat) == cor_MI_vec[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_MI_mat[idx1, idx2], 3)))
}

#plot the lowest correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_MI_mat) == rev(cor_MI_vec)[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_MI_mat[idx1, idx2], 3)))
}

Heatmap

heatmap_list[[1]][[6]]

7. Distance Correlation

  • it is measure to identify non-linear relationships between two random variables with energy distances.
  • distance correlation is calculated by dividing the distance covariance between X and Y by the product of their distance standard deviations.
  • \(0 \leq dCor \leq 1\)
  • \(dCor(X,Y) = \frac{dCov(Y,Y)}{\sqrt{dVar(X)dVar(Y)}}\)
    • \(dCov(X, Y) = \sqrt{\frac{1}{n^2} \sum_{k=1, l=1}^{n} A_{k,l}B_{k,l}}\)
    • \(dVar(X) = dCov(X,X) and dVar(Y) = dCov(Y, Y)\)
cor_dist_mat[1:5,1:5]
##           [,1]      [,2]      [,3]      [,4] [,5]
## [1,]        NA        NA        NA        NA   NA
## [2,] 0.9330406        NA        NA        NA   NA
## [3,] 0.7352955 0.7492927        NA        NA   NA
## [4,] 0.7247663 0.7378265 0.9384940        NA   NA
## [5,] 0.7586371 0.7775185 0.9163427 0.8819946   NA
quantile(cor_dist_mat, na.rm = T)
##          0%         25%         50%         75%        100% 
## 0.006331434 0.037984547 0.081385895 0.339809634 0.994815408
# plot the smallest correlations
cor_dist_vec <- sort(abs(cor_dist_mat), decreasing = T)
plot(cor_dist_vec)

#plot the high correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_dist_mat) == cor_dist_vec[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_dist_mat[idx1, idx2], 3)))
}

#plot the lowest correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_dist_mat) == rev(cor_dist_vec)[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_dist_mat[idx1, idx2], 3)))
}

Heatmap

heatmap_list[[1]][[7]]

8. Chatterjee’s XI Correlation

  • It measures the degree of dependence between the variables with concept of rank.
  • \(0 \leq \xi_n \leq 1\)
  • \(\xi_n(X,Y) = 1 - \frac{3\sum_{i=1}^{n-1} |r_{i+1} -r_i|}{n^2-1}\)
  • \(\xi_n(X,Y) = 1 - \frac{n\sum_{i=1}^{n-1}|r_{i+1}-r_i}{2\sum_{i=1}^{n}l_i(n-l_i)}\)
cor_XI_mat[1:5,1:5]
##           [,1]      [,2]      [,3]      [,4] [,5]
## [1,]        NA        NA        NA        NA   NA
## [2,] 0.9241558        NA        NA        NA   NA
## [3,] 0.8196740 0.8137661        NA        NA   NA
## [4,] 0.8018141 0.7990438 0.8531820        NA   NA
## [5,] 0.8072044 0.8091580 0.8297245 0.8147871   NA
quantile(cor_XI_mat, na.rm = T)
##        0%       25%       50%       75%      100% 
## 0.7377463 0.7833977 0.7922360 0.8018530 0.9679241
# plot the smallest correlations
cor_XI_vec <- sort(abs(cor_XI_mat), decreasing = T)
plot(cor_XI_vec)

#plot the high correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_XI_mat) == cor_XI_vec[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_XI_mat[idx1, idx2], 3)))
}

#plot the lowest correlations
par(mfrow = c(2,2))
for(i in 1:4){
 idx <- which(abs(cor_XI_mat) == rev(cor_XI_vec)[i], arr.ind = T)
 idx1 <- idx[1]; idx2 <- idx[2]
 
 plot(sub_dat[,idx1], sub_dat[,idx2], col = sub_cluster_labels, asp = T,
      pch = 16, xlab = paste0(colnames(sub_dat)[idx1], ", (", idx1, ")"),
      ylab = paste0(colnames(sub_dat)[idx2], ", (", idx2, ")"), 
      main = paste0("Correlation of ", round(cor_XI_mat[idx1, idx2], 3)))
}

Heatmap

heatmap_list[[1]][[8]]

quantile_mat <- c()
for (i in 1:length(cormat_list)){
  quantile_mat <- rbind(quantile_mat,
                        quantile(abs(cormat_list[[i]]), probs = c(0.10, 0.95), na.rm = T))
}
rownames(quantile_mat) <- c("Pearson", "Spearman", "Kendall", "Hoeffding's D",
                            "Blomqvist's Beta","Dist. Corr", "NMI", "XI Corr")

quantile_mat
##                          10%        95%
## Pearson          0.024924626 0.84456876
## Spearman         0.022152044 0.42096946
## Kendall          0.014909052 0.29747580
## Hoeffding's D    0.004243312 0.06489647
## Blomqvist's Beta 0.682970297 0.98415842
## Dist. Corr       0.096852083 0.88140118
## NMI              0.025042845 0.64849550
## XI Corr          0.775707498 0.81991138
# save(quantile_mat, store_date, file = "PCA_seurat_quantile2.RData")